In [1]:
# simple lti routines in this module
include("lti.jl")
using LTI
# plotting module
using PyPlot
In [2]:
function specErr(s1, s2)
n1 = length(s1); n2 = length(s2);
assert(n1 <= n2)
mask = ones(Bool, n2);
err = 0.0;
for k in 1:n1
d, idx = findmin(abs(s1[k] - s2[mask]).^2)
err += d
mask[idx] = false
end
return err
end
Out[2]:
Dynamics matrix, $A$, is $N$ by $N$ with $K$ fast modes and $N - K$ slow modes. $A$ is generated in one of two ways:
A, _, _ = genDyn(N, K, dFast, dSlowLow, dSlowDiff)
, in which case $A$ will have $K$ slow eigenvalues drawn uniform randomly between dSlowLow
and dSlowLow + dSlowDiff
, and $N - K$ eigenvalues at dFast
.A, _, _ = genDynRot(N, K, dFast, dSlowLow, dSlowDiff, omegaScale)
, in which case $A$ will have $K / 2$ (then their complements) slow eigenvalues whose magnitudes are drawn uniformly randomly between dSlowLow
and dSlowLow + dSlowDiff
with phases drawn uniformly randomly between -omegaScale
and omegaScale
. There are then $N - K$ eigenvalues at dFast
.Observation matrix, $C$, is $M$ by $N$, and generated by sampling $M$ random rows of the identity matrix.
$\Pi$ is the steady state covariance matrix satisfying $A\Pi A^T + Q = \Pi$
The system's state $x_t$ evolves according to, $$x_{t + 1} = Ax_t + \xi_t,\text{ with }\xi_t \sim \mathcal{N}(0, Q) \\ y_{t} = Cx_{t} + \eta_t,\text{ with }\eta_t \sim \mathcal{N}(0, R = rI) \\ x_1 \sim \mathcal{N}(0, \Pi)$$
In [30]:
# parameters
K = 4; N = 996; r = 0.00001; dly = 3
dFast = 0.1; dSlowLow = 0.9; dSlowDiff = 0; omegaScale = pi / 3;
# input and observational noise
Q = eye(N + K) * 1.0;
# dynamics as well as its eigen-decomposition
A, U, D = genDynRot(N + K, K, dFast, dSlowLow, dSlowDiff, omegaScale);
# A = [0.912 0.129 0.0308 -0.0028; -0.1064 0.9521 0.1709 -0.1174; -0.0692 -0.1469 0.9258 -0.0659; 0.0384 0.1269 0.0210 0.9125];
# D, U = eig(A)
# stationary covariance and its sqrt for the states
P = dlyap(A, Q); p = real(sqrtm(P));
# one Kalman-Ho trial
function trial(M, T; guessK=K)
C = genObsSamp(N, M); R = eye(M) * r;
_, Y = runLTI(A, C, Q, R, int(T), P=P);
Ap, _ = hoKalman(Y, 3, guessK);
Dp = eig(Ap)[1];
Ep = specErr(Dp, D[1:guessK]);
return Dp, Ep;
end
Out[30]:
In [31]:
M = 50; T = 200;
C = genObsSamp(N + K, M); R = eye(M) * r;
_, Y = runLTI(A, C, Q, R, T, P = P);
Ap, H = hoKalman(Y, dly, K);
s = eigs(Y * Y' / T; nev = K * 6)[1]
Dp = eig(Ap)[1];
figure(figsize=(6, 2))
subplot(121)
plot(1:length(s), s, "r.-", markersize=5)
xlabel("Index"); ylabel("Eigenvalues")
xticks([]); yticks([]); title("Covariance eigenvalues")
subplot(122)
plot(real(D), imag(D), "ro", markersize=8, alpha=0.5)
plot(real(Dp), imag(Dp), "kx", linewidth=1, markeredgewidth=1)
xlim([0, 1]); ylim([-0.8, 0.8])
xlabel("Real"); ylabel("Imag")
xticks([0, 1]); yticks([]); title("A's eigenvalues")
Out[31]:
In [5]:
function visRecovery(;M=1, T=10, nTrial=10, guessK=K)
Dp = Complex64[]
Es = Float64[]
for kTrial in 1:nTrial
tmp = trial(M, T, guessK=guessK)
Dp = [Dp, tmp[1]];
Es = [Es, tmp[2]];
end
return Dp, Es
end
Dp, Es = visRecovery(M=20, T=10000, nTrial=100, guessK=K);
plot(
layer(x=real(D), y=imag(D), Geom.point, Theme(default_color=color("red"), default_point_size=1mm)),
layer(x=real(Dp), y=imag(Dp), Geom.point, Theme(default_point_size=0.5mm)),
Scale.x_continuous(minvalue=0, maxvalue=1), Scale.y_continuous(minvalue=-1, maxvalue=1))
Spectrum error distribution using the greedy error measure
In [468]:
print(mean(Es))
plot(x=Es, Geom.density)
Out[468]:
In [473]:
Ms = K - 2:4:100; dly = 10;
# Ts = 10:4:100;
Ts = logspace(log10(10), log10(2000), 25);
nTrial = 10;
Es = zeros(length(Ms), length(Ts), nTrial)
for (kM, M) in enumerate(Ms)
for (kT, T) in enumerate(Ts)
for kTrial in 1:nTrial
tmp = trial(M, T, guessK=K)
Es[kM, kT, kTrial] = tmp[2]
end
end
end
In [474]:
using PyPlot
imshow(squeeze(mean(Es, 3), 3) / sum(abs(D[1:4]).^2), interpolation="nearest", aspect="auto", origin="lower",
extent=[minimum(log10(Ts)), maximum(log10(Ts)), minimum(Ms), maximum(Ms)], vmin=0, vmax=2)
xlabel("log10(T)"); ylabel("M"); colorbar()
savefig("hankel.delay.$(dly).eps")